printk("Error: %s unknown port size\n", __FUNCTION__);
domain_crash_synchronous();
}
- TRACE_VMEXIT(3, regs->eax);
}
}
#include <xen/kernel.h>
#include <public/hvm/ioreq.h>
#include <xen/domain_page.h>
+#include <asm/hvm/trace.h>
/*
* Most of this code is copied from vmx_io.c and modified
if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )
{
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
+ HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
svm_inject_extint(v, 0x0); /* actual vector doesn't really matter */
return;
}
if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) )
++pt->pending_intr_nr;
/* let's inject this interrupt */
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, intr_vector, 0);
+ if (re_injecting)
+ HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
+ else
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
svm_inject_extint(v, intr_vector);
break;
case APIC_DM_SMI:
#include <asm/x86_emulate.h>
#include <public/sched.h>
#include <asm/hvm/vpt.h>
+#include <asm/hvm/trace.h>
#define SVM_EXTRA_DEBUG
eventinj_t event;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ if ( trap == TRAP_page_fault )
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
+ else
+ HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+
event.bytes = 0;
event.fields.v = 1;
event.fields.type = EVENTTYPE_EXCEPTION;
unsigned int trapnr, int errcode, unsigned long cr2)
{
struct vcpu *v = current;
- svm_inject_exception(v, trapnr, (errcode != -1), errcode);
if ( trapnr == TRAP_page_fault )
v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
+ svm_inject_exception(v, trapnr, (errcode != -1), errcode);
}
static int svm_event_injection_faulted(struct vcpu *v)
regs->ecx = (unsigned long)ecx;
regs->edx = (unsigned long)edx;
+ HVMTRACE_3D(CPUID, v, input,
+ ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
+
inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
ASSERT(inst_len > 0);
__update_guest_eip(vmcb, inst_len);
}
-static inline unsigned long *get_reg_p(unsigned int gpreg,
- struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
+static inline unsigned long *get_reg_p(
+ unsigned int gpreg,
+ struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
unsigned long *reg_p = NULL;
switch (gpreg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ HVMTRACE_0D(DR_WRITE, v);
+
v->arch.hvm_vcpu.flag_dr_dirty = 1;
__restore_debug_registers(v);
else
size = 1;
+ if (dir==IOREQ_READ)
+ HVMTRACE_2D(IO_READ, v, port, size);
+ else
+ HVMTRACE_2D(IO_WRITE, v, port, size);
+
HVM_DBG_LOG(DBG_LEVEL_IO,
"svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
"exit_qualification = %"PRIx64,
return;
}
+ HVMTRACE_2D(CR_READ, v, cr, value);
+
set_reg(gp, value, regs, vmcb);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
value = get_reg(gpreg, regs, vmcb);
+ HVMTRACE_2D(CR_WRITE, v, cr, value);
+
HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
regs->edx = msr_content >> 32;
done:
+ HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
{
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
+
switch (ecx)
{
case MSR_IA32_TIME_STAMP_COUNTER:
/* Check for interrupt not handled or new interrupt. */
if ( (vmcb->rflags & X86_EFLAGS_IF) &&
- (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
+ (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) ) {
+ HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
return;
+ }
+ HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
hvm_hlt(vmcb->rflags);
}
__update_guest_eip (vmcb, inst_len);
}
+ HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
+
paging_invlpg(v, g_vaddr);
}
inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
+ HVMTRACE_1D(VMMCALL, v, regs->eax);
+
if ( regs->eax & 0x80000000 )
{
/* VMMCALL sanity check */
unsigned int exit_reason;
unsigned long eip;
struct vcpu *v = current;
- int error;
int do_debug = 0;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
exit_reason = vmcb->exitcode;
save_svm_cpu_user_regs(v, regs);
+ HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
+
if (exit_reason == VMEXIT_INVALID)
{
svm_dump_vmcb(__func__, vmcb);
}
#endif /* SVM_EXTRA_DEBUG */
- TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
-
switch (exit_reason)
{
case VMEXIT_EXCEPTION_DB:
break;
case VMEXIT_INTR:
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(INTR, v);
+ break;
case VMEXIT_NMI:
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(NMI, v);
+ break;
case VMEXIT_SMI:
- /* Asynchronous events, handled when we STGI'd after the VMEXIT. */
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(SMI, v);
break;
case VMEXIT_INIT:
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
- if (!(error = svm_do_page_fault(va, regs)))
+ if ( svm_do_page_fault(va, regs) )
{
- /* Inject #PG using Interruption-Information Fields */
- svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
-
- v->arch.hvm_svm.cpu_cr2 = va;
- vmcb->cr2 = va;
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
- VMEXIT_EXCEPTION_PF, va);
+ HVMTRACE_2D(PF_XEN, v, va, regs->error_code);
+ break;
}
+
+ v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
+ svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
break;
}
{
struct vcpu *v = current;
+ // this is the last C code before the VMRUN instruction
+ HVMTRACE_0D(VMENTRY, v);
+
local_irq_disable();
asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
}
#include <asm/hvm/vpic.h>
#include <asm/hvm/vlapic.h>
#include <public/hvm/ioreq.h>
+#include <asm/hvm/trace.h>
static inline void
case APIC_DM_EXTINT:
case APIC_DM_FIXED:
case APIC_DM_LOWEST:
+ HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0);
vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, highest_vector, 0);
break;
case APIC_DM_SMI:
#include <asm/x86_emulate.h>
#include <asm/hvm/vpt.h>
#include <public/hvm/save.h>
+#include <asm/hvm/trace.h>
static void vmx_ctxt_switch_from(struct vcpu *v);
static void vmx_ctxt_switch_to(struct vcpu *v);
result = paging_fault(va, regs);
- TRACE_VMEXIT(2, result);
#if 0
if ( !result )
{
regs->ebx = (unsigned long)ebx;
regs->ecx = (unsigned long)ecx;
regs->edx = (unsigned long)edx;
+
+ HVMTRACE_3D(CPUID, current, input,
+ ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
}
#define CASE_GET_REG_P(REG, reg) \
{
struct vcpu *v = current;
+ HVMTRACE_0D(DR_WRITE, v);
+
v->arch.hvm_vcpu.flag_dr_dirty = 1;
/* We could probably be smarter about this */
unsigned long eip;
struct vcpu *v = current;
+ HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
+
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
else
port = regs->edx & 0xffff;
- TRACE_VMEXIT(1, port);
-
size = (exit_qualification & 7) + 1;
dir = test_bit(3, &exit_qualification); /* direction */
+ if (dir==IOREQ_READ)
+ HVMTRACE_2D(IO_READ, current, port, size);
+ else
+ HVMTRACE_2D(IO_WRITE, current, port, size);
+
if ( test_bit(4, &exit_qualification) ) { /* string instruction */
unsigned long addr, count = 1, base;
paddr_t paddr;
if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
hvm_print_line(current, regs->eax); /* guest debug output */
- if ( dir == IOREQ_WRITE )
- TRACE_VMEXIT(2, regs->eax);
-
regs->eip += inst_len;
send_pio_req(port, 1, size, regs->eax, dir, df, 0);
}
goto exit_and_crash;
}
- TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
- TRACE_VMEXIT(2, cr);
- TRACE_VMEXIT(3, value);
+ HVMTRACE_2D(CR_WRITE, v, cr, value);
HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
break;
}
- TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
- TRACE_VMEXIT(2, cr);
- TRACE_VMEXIT(3, value);
+ HVMTRACE_2D(CR_READ, v, cr, value);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
}
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
- TRACE_VMEXIT(1, TYPE_CLTS);
+// TRACE_VMEXIT(1, TYPE_CLTS);
/* We initialise the FPU now, to avoid needing another vmexit. */
setup_fpu(v);
value = v->arch.hvm_vmx.cpu_shadow_cr0;
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
- TRACE_VMEXIT(1, TYPE_LMSW);
- TRACE_VMEXIT(2, value);
+// TRACE_VMEXIT(1, TYPE_LMSW);
+// TRACE_VMEXIT(2, value);
return vmx_set_cr0(value);
break;
default:
regs->edx = msr_content >> 32;
done:
+ HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax,
(unsigned long)regs->edx);
ecx, (u32)regs->eax, (u32)regs->edx);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
switch (ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
static void vmx_do_hlt(void)
{
unsigned long rflags;
+ HVMTRACE_0D(HLT, current);
rflags = __vmread(GUEST_RFLAGS);
hvm_hlt(rflags);
}
BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
- TRACE_VMEXIT(1, vector);
+ HVMTRACE_1D(INTR, current, vector);
switch(vector) {
case LOCAL_TIMER_VECTOR:
unsigned long exit_qualification, inst_len = 0;
struct vcpu *v = current;
- TRACE_3D(TRC_VMX_VMEXIT + v->vcpu_id, 0, 0, 0);
-
exit_reason = __vmread(VM_EXIT_REASON);
+ HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
+
perfc_incra(vmexits, exit_reason);
- TRACE_VMEXIT(0, exit_reason);
if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
local_irq_enable();
vector = intr_info & INTR_INFO_VECTOR_MASK;
- TRACE_VMEXIT(1, vector);
perfc_incra(cause_vector, vector);
switch ( vector )
exit_qualification = __vmread(EXIT_QUALIFICATION);
regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
- TRACE_VMEXIT(3, regs->error_code);
- TRACE_VMEXIT(4, exit_qualification);
-
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
(unsigned long)regs->eax, (unsigned long)regs->ebx,
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
- if ( !vmx_do_page_fault(exit_qualification, regs) )
+ if ( vmx_do_page_fault(exit_qualification, regs) )
{
- /* Inject #PG using Interruption-Information Fields. */
- vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
- v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
- TRAP_page_fault, exit_qualification);
+ HVMTRACE_2D(PF_XEN, v, exit_qualification, regs->error_code);
+ break;
}
+
+ v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
+ vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
break;
}
case TRAP_nmi:
+ HVMTRACE_0D(NMI, v);
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
else
__update_guest_eip(inst_len);
exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_do_invlpg(exit_qualification);
- TRACE_VMEXIT(4, exit_qualification);
break;
}
case EXIT_REASON_VMCALL:
{
+ HVMTRACE_1D(VMMCALL, v, regs->eax);
inst_len = __get_instruction_length(); /* Safe: VMCALL */
__update_guest_eip(inst_len);
hvm_do_hypercall(regs);
inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
if ( vmx_cr_access(exit_qualification, regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(4, exit_qualification);
break;
}
case EXIT_REASON_DR_ACCESS:
exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
vmx_io_instruction(exit_qualification, inst_len);
- TRACE_VMEXIT(4, exit_qualification);
break;
case EXIT_REASON_MSR_READ:
inst_len = __get_instruction_length(); /* Safe: RDMSR */
if ( vmx_do_msr_read(regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(1, regs->ecx);
- TRACE_VMEXIT(2, regs->eax);
- TRACE_VMEXIT(3, regs->edx);
break;
case EXIT_REASON_MSR_WRITE:
inst_len = __get_instruction_length(); /* Safe: WRMSR */
if ( vmx_do_msr_write(regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(1, regs->ecx);
- TRACE_VMEXIT(2, regs->eax);
- TRACE_VMEXIT(3, regs->edx);
break;
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
asmlinkage void vmx_trace_vmentry(void)
{
struct vcpu *v = current;
- TRACE_5D(TRC_VMX_VMENTRY + current->vcpu_id,
- v->arch.hvm_vcpu.hvm_trace_values[0],
- v->arch.hvm_vcpu.hvm_trace_values[1],
- v->arch.hvm_vcpu.hvm_trace_values[2],
- v->arch.hvm_vcpu.hvm_trace_values[3],
- v->arch.hvm_vcpu.hvm_trace_values[4]);
-
- TRACE_VMEXIT(0, 0);
- TRACE_VMEXIT(1, 0);
- TRACE_VMEXIT(2, 0);
- TRACE_VMEXIT(3, 0);
- TRACE_VMEXIT(4, 0);
+ HVMTRACE_0D(VMENTRY, v);
}
/*
/* a flag recording whether initialization has been done */
/* or more properly, if the tbuf subsystem is enabled right now */
-int tb_init_done;
+int tb_init_done __read_mostly;
/* which CPUs tracing is enabled on */
static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
#define HVM_DBG_LOG(level, _f, _a...)
#endif
-#define TRACE_VMEXIT(index, value) \
- current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
-
/*
* Save/restore support
*/
/* Flags */
int flag_dr_dirty;
- unsigned long hvm_trace_values[5];
-
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
#include <asm/processor.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/i387.h>
+#include <asm/hvm/trace.h>
extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_vmentry(void);
__vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
+
+ if (trap == TRAP_page_fault)
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
+ else
+ HVMTRACE_2D(INJ_EXC, v, trap, error_code);
}
static inline void vmx_inject_hw_exception(
#define TRC_GEN 0x0001f000 /* General trace */
#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
-#define TRC_VMX 0x0008f000 /* Xen VMX trace */
+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
#define TRC_MEM 0x0010f000 /* Xen memory trace */
#define TRC_ALL 0xfffff000
/* Trace subclasses */
#define TRC_SUBCLS_SHIFT 12
-/* trace subclasses for VMX */
-#define TRC_VMXEXIT 0x00081000 /* VMX exit trace */
-#define TRC_VMXENTRY 0x00082000 /* VMX exit trace */
-#define TRC_VMXINTR 0x00084000 /* VMX interrupt trace */
+/* trace subclasses for SVM */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
/* Trace events per class */
#define TRC_LOST_RECORDS (TRC_GEN + 1)
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
/* trace events per subclass */
-#define TRC_VMX_VMEXIT (TRC_VMXEXIT + 1)
-#define TRC_VMX_VMENTRY (TRC_VMXENTRY + 1)
-#define TRC_VMX_INTR (TRC_VMXINTR + 1)
-
+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
/* This structure represents a single trace buffer record. */
struct t_rec {